xen: Remove {nmi_pending,nmi_masked,paused} vcpu bitflags.
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Thu, 29 Mar 2007 17:07:33 +0000 (18:07 +0100)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Thu, 29 Mar 2007 17:07:33 +0000 (18:07 +0100)
Signed-off-by: Keir Fraser <keir@xensource.com>
13 files changed:
xen/arch/x86/nmi.c
xen/arch/x86/traps.c
xen/arch/x86/x86_32/asm-offsets.c
xen/arch/x86/x86_32/entry.S
xen/arch/x86/x86_32/traps.c
xen/arch/x86/x86_64/asm-offsets.c
xen/arch/x86/x86_64/compat/entry.S
xen/arch/x86/x86_64/compat/traps.c
xen/arch/x86/x86_64/entry.S
xen/arch/x86/x86_64/traps.c
xen/common/domain.c
xen/common/kernel.c
xen/include/xen/sched.h

index 4d7fdff3c6cf8a0e8882bbdfa3ba897679a09f95..f226e90a128ff00795f9a325e59c2db3c17c8594 100644 (file)
@@ -445,18 +445,18 @@ static void do_nmi_stats(unsigned char key)
     int i;
     struct domain *d;
     struct vcpu *v;
+
     printk("CPU\tNMI\n");
-    for_each_cpu(i)
+    for_each_cpu ( i )
         printk("%3d\t%3d\n", i, nmi_count(i));
 
-    if ((d = dom0) == NULL)
-        return;
-    if ((v = d->vcpu[0]) == NULL)
+    if ( ((d = dom0) == NULL) || ((v = d->vcpu[0]) == NULL) )
         return;
-    if (v->vcpu_flags & (VCPUF_nmi_pending|VCPUF_nmi_masked))
+
+    if ( v->nmi_pending || v->nmi_masked )
         printk("dom0 vpu0: NMI %s%s\n",
-               v->vcpu_flags & VCPUF_nmi_pending ? "pending " : "",
-               v->vcpu_flags & VCPUF_nmi_masked ? "masked " : "");
+               v->nmi_pending ? "pending " : "",
+               v->nmi_masked  ? "masked " : "");
     else
         printk("dom0 vcpu0: NMI neither pending nor masked\n");
 }
index 67e8ca0b6d03d01bf7e72eab51308781c23c7235..e1adce3b9eb919055122ba46494e37f6d7961edc 100644 (file)
@@ -1859,7 +1859,7 @@ static void nmi_dom0_report(unsigned int reason_idx)
 
     set_bit(reason_idx, nmi_reason(d));
 
-    if ( !test_and_set_bit(_VCPUF_nmi_pending, &v->vcpu_flags) )
+    if ( !xchg(&v->nmi_pending, 1) )
         raise_softirq(NMI_SOFTIRQ); /* not safe to wake up a vcpu here */
 }
 
index e092e9d473717e3b3dd388246452eab04eb1dbeb..4405bdb749a7aaafb678136a9e752d40a8253887 100644 (file)
@@ -68,8 +68,8 @@ void __dummy__(void)
     OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
     OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
     OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
-    DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
-    DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
+    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
+    OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
     DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
     BLANK();
 
index e2f5b22e99e8d398d5f92ee6af731a36114b8ee1..5f434af0addbef623d84bbdb6fac7a769c7bcf47 100644 (file)
@@ -232,8 +232,8 @@ test_all_events:
         shl  $IRQSTAT_shift,%eax
         test %ecx,irq_stat(%eax,1)
         jnz  process_softirqs
-        btr  $_VCPUF_nmi_pending,VCPU_flags(%ebx)
-        j  process_nmi
+        testb $1,VCPU_nmi_pending(%ebx)
+        jnz  process_nmi
 test_guest_events:
         movl VCPU_vcpu_info(%ebx),%eax
         testb $0xFF,VCPUINFO_upcall_mask(%eax)
@@ -259,11 +259,13 @@ process_softirqs:
 
         ALIGN
 process_nmi:
+        testb $1,VCPU_nmi_masked(%ebx)
+        jnz  test_guest_events
+        movb $0,VCPU_nmi_pending(%ebx)
         movl VCPU_nmi_addr(%ebx),%eax
         test %eax,%eax
-        jz   test_all_events
-        bts  $_VCPUF_nmi_masked,VCPU_flags(%ebx)
-        jc   1f
+        jz   test_guest_events
+        movb $1,VCPU_nmi_masked(%ebx)
         sti
         leal VCPU_trap_bounce(%ebx),%edx
         movl %eax,TRAPBOUNCE_eip(%edx)
@@ -271,8 +273,6 @@ process_nmi:
         movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
         call create_bounce_frame
         jmp  test_all_events
-1:      bts  $_VCPUF_nmi_pending,VCPU_flags(%ebx)
-        jmp  test_guest_events
 
 bad_hypercall:
         movl $-ENOSYS,UREGS_eax(%esp)
index e6cd085962cd4cf1985d039900275b70fdf29fb2..fe22212dd460c7c2a9a438a42886c1d9eecd28fd 100644 (file)
@@ -218,7 +218,7 @@ unsigned long do_iret(void)
     }
 
     /* No longer in NMI context. */
-    clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
+    current->nmi_masked = 0;
 
     /* Restore upcall mask from supplied EFLAGS.IF. */
     current->vcpu_info->evtchn_upcall_mask = !(eflags & X86_EFLAGS_IF);
index 4e4b1036558894b0f3c773434605be8ff0926db3..2c152b1da7c0ada1f9f2eb26161cd7ab36b11916 100644 (file)
@@ -77,8 +77,8 @@ void __dummy__(void)
     OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
     OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
     OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
-    DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
-    DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
+    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
+    OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
     DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
     DEFINE(_VGCF_syscall_disables_events,  _VGCF_syscall_disables_events);
     BLANK();
index dc44e6c6d0211258607e72d4bd43a2796041566e..ec254f7382ea85bb2c917a527029db1a3a9bd26f 100644 (file)
@@ -87,8 +87,8 @@ ENTRY(compat_test_all_events)
         leaq  irq_stat(%rip),%rcx
         testl $~0,(%rcx,%rax,1)
         jnz   compat_process_softirqs
-        btrq  $_VCPUF_nmi_pending,VCPU_flags(%rbx)
-        j   compat_process_nmi
+        testb $1,VCPU_nmi_pending(%rbx)
+        jnz   compat_process_nmi
 compat_test_guest_events:
         movq  VCPU_vcpu_info(%rbx),%rax
         testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
@@ -116,11 +116,13 @@ compat_process_softirqs:
        ALIGN
 /* %rbx: struct vcpu */
 compat_process_nmi:
+        testb $1,VCPU_nmi_masked(%rbx)
+        jnz   compat_test_guest_events
+        movb  $0,VCPU_nmi_pending(%rbx)
         movl  VCPU_nmi_addr(%rbx),%eax
         testl %eax,%eax
-        jz    compat_test_all_events
-        btsq  $_VCPUF_nmi_masked,VCPU_flags(%rbx)
-        jc    1f
+        jz    compat_test_guest_events
+        movb  $1,VCPU_nmi_masked(%rbx)
         sti
         leaq  VCPU_trap_bounce(%rbx),%rdx
         movl  %eax,TRAPBOUNCE_eip(%rdx)
@@ -128,9 +130,6 @@ compat_process_nmi:
         movw  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
-1:
-        btsq  $_VCPUF_nmi_pending,VCPU_flags(%rbx)
-        jmp   compat_test_guest_events
 
 compat_bad_hypercall:
         movl $-ENOSYS,UREGS_rax(%rsp)
index 0172e846c4091c5294c74502adc2984bae86a765..f9a983d5d22653f81b4628b5196b9c494d5f9629 100644 (file)
@@ -118,7 +118,7 @@ unsigned int compat_iret(void)
         regs->_esp += 16;
 
     /* No longer in NMI context. */
-    clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
+    current->nmi_masked = 0;
 
     /* Restore upcall mask from supplied EFLAGS.IF. */
     vcpu_info(current, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
index a69ac53de2d76a20effead5a5757961ac8875aed..c921579e3c8fba94ff36a2aecf2f8c874d49aea9 100644 (file)
@@ -177,8 +177,8 @@ test_all_events:
         leaq  irq_stat(%rip),%rcx
         testl $~0,(%rcx,%rax,1)
         jnz   process_softirqs
-        btr   $_VCPUF_nmi_pending,VCPU_flags(%rbx)
-        j   process_nmi
+        testb $1,VCPU_nmi_pending(%rbx)
+        jnz   process_nmi
 test_guest_events:
         movq  VCPU_vcpu_info(%rbx),%rax
         testb $0xFF,VCPUINFO_upcall_mask(%rax)
@@ -204,19 +204,19 @@ process_softirqs:
         ALIGN
 /* %rbx: struct vcpu */
 process_nmi:
+        testb $1,VCPU_nmi_masked(%rbx)
+        jnz  test_guest_events
+        movb $0,VCPU_nmi_pending(%rbx)
         movq VCPU_nmi_addr(%rbx),%rax
         test %rax,%rax
-        jz   test_all_events
-        bts  $_VCPUF_nmi_masked,VCPU_flags(%rbx)
-        jc   1f
+        jz   test_guest_events
+        movb $1,VCPU_nmi_masked(%rbx)
         sti
         leaq VCPU_trap_bounce(%rbx),%rdx
         movq %rax,TRAPBOUNCE_eip(%rdx)
         movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
         call create_bounce_frame
         jmp  test_all_events
-1:      bts  $_VCPUF_nmi_pending,VCPU_flags(%rbx)
-        jmp  test_guest_events
 
 bad_hypercall:
         movq $-ENOSYS,UREGS_rax(%rsp)
index 7a255c291fcb3091533e00664bcdfef9c9071ddc..dbdb47b65ea0f956d3b58a07a8fd2a5592b2084d 100644 (file)
@@ -231,7 +231,7 @@ unsigned long do_iret(void)
     }
 
     /* No longer in NMI context. */
-    clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
+    current->nmi_masked = 0;
 
     /* Restore upcall mask from supplied EFLAGS.IF. */
     vcpu_info(current, evtchn_upcall_mask) = !(iret_saved.rflags & EF_IE);
index 1b3e68bd0e5c7f7fd69c9c54841cff1c14a43fa0..47e7da73b30f24fc7f28781f446e91ad599128a7 100644 (file)
@@ -95,7 +95,6 @@ struct vcpu *alloc_vcpu(
 
     v->domain = d;
     v->vcpu_id = vcpu_id;
-    spin_lock_init(&v->pause_lock);
 
     v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
     v->runstate.state_entry_time = NOW();
@@ -407,40 +406,23 @@ void domain_destroy(struct domain *d)
     call_rcu(&d->rcu, complete_domain_destroy);
 }
 
-static void vcpu_pause_setup(struct vcpu *v)
-{
-    spin_lock(&v->pause_lock);
-    if ( v->pause_count++ == 0 )
-        set_bit(_VCPUF_paused, &v->vcpu_flags);
-    spin_unlock(&v->pause_lock);
-}
-
 void vcpu_pause(struct vcpu *v)
 {
     ASSERT(v != current);
-    vcpu_pause_setup(v);
+    atomic_inc(&v->pause_count);
     vcpu_sleep_sync(v);
 }
 
 void vcpu_pause_nosync(struct vcpu *v)
 {
-    vcpu_pause_setup(v);
+    atomic_inc(&v->pause_count);
     vcpu_sleep_nosync(v);
 }
 
 void vcpu_unpause(struct vcpu *v)
 {
-    int wake;
-
     ASSERT(v != current);
-
-    spin_lock(&v->pause_lock);
-    wake = (--v->pause_count == 0);
-    if ( wake )
-        clear_bit(_VCPUF_paused, &v->vcpu_flags);
-    spin_unlock(&v->pause_lock);
-
-    if ( wake )
+    if ( atomic_dec_and_test(&v->pause_count) )
         vcpu_wake(v);
 }
 
@@ -507,9 +489,9 @@ int vcpu_reset(struct vcpu *v)
     v->fpu_dirtied     = 0;
     v->is_polling      = 0;
     v->is_initialised  = 0;
+    v->nmi_pending     = 0;
+    v->nmi_masked      = 0;
     clear_bit(_VCPUF_blocked, &v->vcpu_flags);
-    clear_bit(_VCPUF_nmi_pending, &v->vcpu_flags);
-    clear_bit(_VCPUF_nmi_masked, &v->vcpu_flags);
 
  out:
     UNLOCK_BIGLOCK(v->domain);
index fa448b07a3a524ba9add1775b70c8157c50f8624..a4dbc618e04695f1393251384abdb1bfa572dd14 100644 (file)
@@ -256,7 +256,7 @@ long register_guest_nmi_callback(unsigned long address)
      * now.
      */
     if ( arch_get_nmi_reason(d) != 0 )
-        set_bit(_VCPUF_nmi_pending, &v->vcpu_flags);
+        v->nmi_pending = 1;
 #endif
 
     return 0;
index 7a1257ee99980a7764217df51e097db1d4fd0f3a..33071104e589b9302176c08b02779471fa5b0143 100644 (file)
@@ -110,11 +110,14 @@ struct vcpu
     bool_t           is_initialised;
     /* Currently running on a CPU? */
     bool_t           is_running;
+    /* NMI callback pending for this VCPU? */
+    bool_t           nmi_pending;
+    /* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
+    bool_t           nmi_masked;
 
     unsigned long    vcpu_flags;
 
-    spinlock_t       pause_lock;
-    unsigned int     pause_count;
+    atomic_t         pause_count;
 
     u16              virq_to_evtchn[NR_VIRQS];
 
@@ -440,31 +443,18 @@ extern struct domain *domain_list;
  /* VCPU is offline. */
 #define _VCPUF_down            1
 #define VCPUF_down             (1UL<<_VCPUF_down)
- /* NMI callback pending for this VCPU? */
-#define _VCPUF_nmi_pending     2
-#define VCPUF_nmi_pending      (1UL<<_VCPUF_nmi_pending)
- /* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
-#define _VCPUF_nmi_masked      3
-#define VCPUF_nmi_masked       (1UL<<_VCPUF_nmi_masked)
- /* VCPU is paused by the hypervisor? */
-#define _VCPUF_paused          4
-#define VCPUF_paused           (1UL<<_VCPUF_paused)
  /* VCPU is blocked awaiting an event to be consumed by Xen. */
-#define _VCPUF_blocked_in_xen  5
+#define _VCPUF_blocked_in_xen  2
 #define VCPUF_blocked_in_xen   (1UL<<_VCPUF_blocked_in_xen)
  /* VCPU affinity has changed: migrating to a new CPU. */
-#define _VCPUF_migrating       6
+#define _VCPUF_migrating       3
 #define VCPUF_migrating        (1UL<<_VCPUF_migrating)
 
 static inline int vcpu_runnable(struct vcpu *v)
 {
-    return ( !(v->vcpu_flags &
-               ( VCPUF_blocked |
-                 VCPUF_down |
-                 VCPUF_paused |
-                 VCPUF_blocked_in_xen |
-                 VCPUF_migrating )) &&
-             (atomic_read(&v->domain->pause_count) == 0) );
+    return (!v->vcpu_flags &&
+            !atomic_read(&v->pause_count) &&
+            !atomic_read(&v->domain->pause_count));
 }
 
 void vcpu_pause(struct vcpu *v);